5.0 10X Genomics PBMC 3K Dataset

In [24]:
from clustergrammer import *
net = Network()
df = {}

import clustergrammer_widget2
import clustergrammer_groupby as cby
import gene_exp_10x
In [25]:
from sklearn.metrics import f1_score
import pandas as pd
import numpy as np
from copy import deepcopy

import matplotlib.pyplot as plt
%matplotlib inline 

Load Data

In [26]:
df = gene_exp_10x.load_gene_exp_to_df('../data/pbmc3k_filtered_gene_bc_matrices/hg19/')
df.shape
Out[26]:
(32738, 2700)
In [27]:
all_genes = df.index.tolist()
print(len(all_genes))
keep_genes = [x for x in all_genes if 'RPL' not in x]
keep_genes = [x for x in keep_genes if 'RPS' not in x]
print(len(keep_genes))

df = df.loc[keep_genes]
df.shape

# Removing Mitochondrial Genes
list_mito_genes = ['MTRNR2L11', 'MTRF1', 'MTRNR2L12', 'MTRNR2L13', 'MTRF1L', 'MTRNR2L6', 'MTRNR2L7',
                'MTRNR2L10', 'MTRNR2L8', 'MTRNR2L5', 'MTRNR2L1', 'MTRNR2L3', 'MTRNR2L4']

all_genes = df.index.tolist()
mito_genes = [x for x in all_genes if 'MT-' == x[:3] or 
             x.split('_')[0] in list_mito_genes]
print(mito_genes)

keep_genes = [x for x in all_genes if x not in mito_genes]
df = df.ix[keep_genes]

# # normalize by UMI count
# barcode_umi_sum = df['ge'].sum()
# df['ge'] = df['ge'].div(barcode_umi_sum)
32738
32546
['MTRNR2L11', 'MTRNR2L12', 'MTRNR2L13', 'MTRF1L', 'MTRNR2L6', 'MTRNR2L10', 'MTRNR2L7', 'MTRNR2L5', 'MTRNR2L8', 'MTRF1', 'MTRNR2L4', 'MTRNR2L1', 'MTRNR2L3', 'MT-ND1', 'MT-ND2', 'MT-CO1', 'MT-CO2', 'MT-ATP8', 'MT-ATP6', 'MT-CO3', 'MT-ND3', 'MT-ND4L', 'MT-ND4', 'MT-ND5', 'MT-ND6', 'MT-CYB']
/Users/nickfernandez/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:20: DeprecationWarning: 
.ix is deprecated. Please use
.loc for label based indexing or
.iloc for positional indexing

See the documentation here:
http://pandas.pydata.org/pandas-docs/stable/indexing.html#ix-indexer-is-deprecated
In [28]:
def calc_mean_var_disp(df_inst):
    mean_arr = []
    var_arr = []
    mean_names = []
    for inst_gene in df_inst.index.tolist():
        mean_arr.append( df_inst.loc[inst_gene].mean() )
        var_arr.append(df_inst.loc[inst_gene].var())
        mean_names.append(inst_gene)

    ser_mean = pd.Series(data=mean_arr, index=mean_names)
    ser_var = pd.Series(data=var_arr, index=mean_names)    
    return ser_mean, ser_var
In [29]:
ser_mean, ser_var = calc_mean_var_disp(df)
min_mean = 0.02
keep_genes = ser_mean[ser_mean>min_mean].index.tolist()
df = df.loc[keep_genes]
print(df.shape)
(6844, 2700)

Filter by Dispersion

In [30]:
num_genes = 1000
num_cells = 1000
In [31]:
ser_mean, ser_var = calc_mean_var_disp(df)
ser_disp = ser_var.divide(ser_mean).sort_values(ascending=False)
keep_genes = ser_disp[:num_genes].index.tolist()

Arcsin Transform

In [32]:
df = df.loc[keep_genes]
df.shape
df = np.arcsinh(df/5)

Z-score and Subset of Data

In [33]:
net.load_df(df)
net.normalize(axis='row', norm_type='zscore')
net.swap_nan_for_zero()
net.random_sample(axis='col', num_samples=num_cells, random_state=99)
df = net.export_df()
df.shape
Out[33]:
(1000, 1000)

Visualize Data in Clustergrammer-Widget2

In [34]:
net.load_df(df)
net.clip(lower=-5, upper=5)
net.cluster()
net_json = net.export_viz_to_widget()
w = clustergrammer_widget2.ExampleWidget(network=net_json)
w
In [ ]:
 

Visualize Original Dataset

In [ ]:
net.load_df(df['ge'])
net.filter_N_top(inst_rc='row', N_top=250, rank_type='var')
net.normalize(axis='row', norm_type='zscore')
net.random_sample(axis='col', num_samples=250, random_state=99)
net.clip(lower=-5, upper=5)
net.cluster()
net.widget()

Load NM'3337 gene sigantures

In [ ]:
net.load_file('../data/cell_type_signatures/nm3337_broad_cell_type_sigs.txt')
df['bct-sig'] = net.export_df()
print(df['bct-sig'].shape)

net.load_file('../data/cell_type_signatures/nm3337_narrow_cell_type_sigs.txt')
df['nct-sig'] = net.export_df()
print(df['nct-sig'].shape)
In [ ]:
net.load_df(df['nct-sig'])
net.cluster()
net.widget()
In [ ]:
sig_rows = df['bct-sig'].index.tolist()
clean_sig_rows = [x.split('_')[0] for x in sig_rows]
print(len(clean_sig_rows), len(list(set(clean_sig_rows))))
In [ ]:
ge_rows = df['ge'].index.tolist()
clean_ge_rows = [x.split('_')[0] for x in ge_rows]
print(len(ge_rows), len(list(set(clean_ge_rows))))
In [ ]:
ser_ge_rows = pd.Series(clean_ge_rows)
In [ ]:
gene_name_count = ser_ge_rows.value_counts(ascending=False)
duplicate_genes = gene_name_count[gene_name_count > 1].index.tolist()
len(duplicate_genes)

only add unique index to duplicate genes

In [ ]:
dup_index = {}
new_rows = []
for inst_row in clean_ge_rows:
    
    # add index to non-unique genes
    if inst_row in duplicate_genes:
        
        # calc non-unique index
        if inst_row not in dup_index:
            dup_index[inst_row] = 1
        else:
            dup_index[inst_row] = dup_index[inst_row]  + 1
            
        new_row = inst_row + '_' + str(dup_index[inst_row])
        
    else:
        new_row = inst_row
        
    new_rows.append(new_row)
In [ ]:
print(len(new_rows))
print(len(list(set(new_rows))))
In [ ]:
# df['ge'].index = new_rows
# df['ge-z'].index = new_rows

Predict Cell Types using NM3337 Signatures

In [ ]:
rows = df['nct-sig'].index.tolist()
new_rows = [x.split('_')[0] for x in rows]
df['nct-sig'].index = new_rows
In [ ]:
df['nct-sig'].columns.tolist()
In [ ]:
rows = df['bct-sig'].index.tolist()
new_rows = [x.split('_')[0] for x in rows]
df['bct-sig'].index = new_rows
In [ ]:
# rows = df['ge-z'].index.tolist()
# new_rows = [x.split('_')[0] for x in rows]
# df['ge-z'].index = new_rows
In [ ]:
df['pred_cat'], df['sig_sim'], y_info = cby.predict_cats_from_sigs(df['ge-z'], df['bct-sig'], 
                                                                   predict_level='Cell Type', unknown_thresh=0.05)
In [ ]:
net.load_df(df['pred_cat'])
net.set_cat_color(axis='col', cat_index=1, cat_name='Cell Type: T cells CD8', inst_color='red')
net.random_sample(axis='col', num_samples=250, random_state=99)
net.clip(lower=-5, upper=5)
net.cluster()
net.widget()
In [ ]:
df['ge-cat'] = deepcopy(df['ge'])
df['ge-cat'].shape
In [ ]:
# transfer predicted categories to full dataset and add UMI count
cat_cols = df['pred_cat'].columns.tolist()
df['ge-cat'].columns = cat_cols

# new_cols = [(x[0], x[1], 'UMI: ' + str(barcode_umi_sum[x[0]])) for x in cat_cols]

df['ge-cat-umi'] = deepcopy(df['ge-cat'])
# df['ge-cat-umi'].columns = new_cols
# print(df['ge-cat-umi'].shape)
In [ ]:
net.load_df(df['ge-cat-umi'])
net.set_cat_color(axis='col', cat_index=1, cat_name='Cell Type: T cells CD8', inst_color='red')
net.filter_N_top(inst_rc='row', N_top=250, rank_type='var')
net.random_sample(axis='col', num_samples=250, random_state=99)
net.normalize(axis='row', norm_type='zscore')
df['tmp'] = net.export_df()
net.clip(lower=-5, upper=5)
net.cluster()
net.widget()
In [ ]:
df['tmp'].shape

Make more general comparison

In [ ]:
sim_dict, pval_dict = cby.sim_same_and_diff_category_samples(df['tmp'])
In [ ]:
sim_dict['same'].mean()
In [ ]:
sim_dict['diff'].mean()
In [ ]:
pval_dict

Signature Prediction

In [ ]:
%%time
net.load_df(df['ge-cat-umi'])
net.filter_N_top(inst_rc='row', N_top=1000, rank_type='var')
net.normalize(axis='row', norm_type='zscore')
df['ge-cat-umi-z'] = net.export_df() 

df['cat-sig'], keep_genes, keep_genes_dict = cby.generate_signatures(
                                                          df['ge-cat-umi-z'], 
                                                          'Cell Type', pval_cutoff=0.05)

df['pred_cat'], df['sig_sim'], y_info = cby.predict_cats_from_sigs(df['ge-cat-umi-z'], 
                                                                   df['cat-sig'])

df['conf'], populations, ser_correct, fraction_correct = cby.confusion_matrix_and_correct_series(y_info)
print('fraction correct: ', fraction_correct)
print(f1_score(y_info['true'], y_info['pred'], average='macro'))
print(f1_score(y_info['true'], y_info['pred'], average='micro'))
print(f1_score(y_info['true'], y_info['pred'], average='weighted'))

Shuffle

In [ ]:
df['ge-cat-umi-z'].shape
In [ ]:
%%time
num_shuffles = 10
perform_ser = cby.compare_performance_to_shuffled_labels(df['ge-cat-umi-z'], 'Cell Type', 
                                                         num_shuffles=num_shuffles)
print('mean: ', perform_ser.mean(), 'std: ', perform_ser.std())
print('previously calc real performance: ', fraction_correct)
In [ ]:
# perform_ser
In [ ]:
perform_ser.hist()
In [ ]:
(fraction_correct - perform_ser.mean())/perform_ser.std()

Same-Cat vs Diff-Cat Sample Similarity

In [ ]:
net.load_df(df['ge-cat-umi'])
net.filter_N_top(inst_rc='row', N_top=1000, rank_type='var')
net.normalize(axis='row', norm_type='zscore')
# net.random_sample(axis='col', num_samples=750, random_state=99)
df['ge-small'] = net.export_df()
print(df['ge-small'].shape)
In [ ]:
%%time
sim_dict, pval_dict = cby.sim_same_and_diff_category_samples(df['ge-small'])
In [ ]:
hist = {}
hist['diff'] = sim_dict['diff'].hist(bins=100, range=(-0.2,0.5))
In [ ]:
hist['same'] = sim_dict['same'].hist(bins=100, range=(-0.2,0.5))
In [ ]:
sim_dict['diff'].mean()
In [ ]:
sim_dict['same'].mean()

Calc ROC Curve

In [ ]:
from sklearn.metrics import roc_curve
sim_dict['same'].shape
In [ ]:
true_index = list(np.ones(sim_dict['same'].shape[0]))
false_index = list(np.zeros(sim_dict['diff'].shape[0]))
y_true = true_index + false_index
print(len(y_true))
In [ ]:
true_val = list(sim_dict['same'].get_values())
false_val = list(sim_dict['diff'].get_values())
y_score = true_val + false_val
print(len(y_score))
In [ ]:
fpr, tpr, thresholds = roc_curve(y_true, y_score)
In [ ]:
fpr.shape
In [ ]:
tpr.shape

Plot ROC Curve

In [ ]:
plt.figure()
plt.plot(fpr, tpr)
plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
plt.figure(figsize=(10,10))
In [ ]:
from sklearn.metrics import auc
In [ ]:
auc(fpr, tpr)
In [ ]: